# Importing Necessary Libraries
import cv2
import os
import shutil
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# import shutil
# # Clear the existing mount point
# shutil.rmtree('/content/drive')
# Mount Google Drive
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
# Function for Formatting Dataset
def FormatDataset(dataset_src, dataset_dest, classes):
# Making a Copy of Dataset
new_cropped_dest = [os.path.join(dataset_dest, cls, 'CROPPED') for cls in classes]
new_complete_dest = [os.path.join(dataset_dest, cls, 'COMPLETE') for cls in classes]
cropped_src = [ dataset_src + "/im_" + cls + "/im_" + cls + "/CROPPED" for cls in classes ]
complete_src = [ dataset_src + "/im_" + cls + "/im_" + cls for cls in classes ]
for (dest1, dest2) in zip(new_cropped_dest, new_complete_dest):
os.makedirs(dest1, exist_ok=True)
os.makedirs(dest2, exist_ok=True)
# Formating Cropped Images
for (src, new_dest) in zip(cropped_src, new_cropped_dest):
for file in os.listdir(src):
filename, file_ext = os.path.splitext(file)
if file_ext == '.bmp':
img_des = os.path.join(new_dest, filename + '.jpg')
img = cv2.imread(os.path.join(src, file))
img = cv2.resize(img, (64, 64))
img = cv2.copyMakeBorder(img, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
img = cv2.blur(img, (2, 2))
cv2.imwrite(img_des ,img)
# Formatting Complete Images
for (src, new_dest) in zip(complete_src, new_complete_dest):
for file in os.listdir(src):
filename, file_ext = os.path.splitext(file)
if file_ext == '.bmp':
img_des = os.path.join(new_dest, filename + '.jpg')
img = cv2.imread(os.path.join(src, file))
img = cv2.resize(img, (256, 256))
img = cv2.copyMakeBorder(img, 2, 2, 2, 2, cv2.BORDER_CONSTANT, value=0)
img = cv2.blur(img, (2, 2))
cv2.imwrite(img_des, img)
# Source Location for Dataset
src = '/content/drive/Shareddrives/Computer Vision Final Project'
# Destination Location for Dataset
dest = '/content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer'
# Image Classes
classes = ["Dyskeratotic", "Koilocytotic", "Metaplastic", "Parabasal", "Superficial-Intermediate"]
# Formatting Dataset
FormatDataset(src, dest, classes)
import os
import matplotlib.pyplot as plt
root_dir = "/content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer"
classes = ["Dyskeratotic","Koilocytotic","Metaplastic","Parabasal","Superficial-Intermediate"]
def GetDatasetSize(path, classes, main="CROPPED"):
num_of_image = {}
total_images = 0
for cls in classes:
# Counting the Number of Files in the Folder
num_files = len(os.listdir(os.path.join(path, cls, main)))
num_of_image[cls] = num_files
total_images += num_files
return num_of_image, total_images
def plot_class_distribution(class_image_counts):
classes = list(class_image_counts.keys())
counts = list(class_image_counts.values())
colors = ['orange' if cls in ["Dyskeratotic", "Koilocytotic"] else
'yellow' if cls == "Metaplastic" else
'green' for cls in classes]
plt.figure(figsize=(10, 6))
plt.bar(classes, counts, color=colors)
plt.xlabel('Class')
plt.ylabel('Number of Images')
plt.title('Number of Images per Class')
plt.xticks(rotation=45)
plt.show()
class_image_counts, total_images = GetDatasetSize(root_dir, classes, "COMPLETE")
print("COMPLETE")
print("Number of images per class:", class_image_counts)
print("Total number of images:", total_images)
# Plot the distribution
plot_class_distribution(class_image_counts)
COMPLETE
Number of images per class: {'Dyskeratotic': 223, 'Koilocytotic': 238, 'Metaplastic': 271, 'Parabasal': 108, 'Superficial-Intermediate': 126}
Total number of images: 966
class_image_counts, total_images = GetDatasetSize(root_dir, classes, "CROPPED")
print("CROPPED")
print("Number of images per class:", class_image_counts)
print("Total number of images:", total_images)
# Plot the distribution
plot_class_distribution(class_image_counts)
CROPPED
Number of images per class: {'Dyskeratotic': 813, 'Koilocytotic': 825, 'Metaplastic': 793, 'Parabasal': 787, 'Superficial-Intermediate': 831}
Total number of images: 4049
import os
import cv2
import matplotlib.pyplot as plt
def display_images(path, classes, main="CROPPED", num_images=5):
fig, axes = plt.subplots(len(classes), num_images, figsize=(15, 15))
color_map = {
"Dyskeratotic": "orange",
"Koilocytotic": "orange",
"Metaplastic": "yellow",
"Parabasal": "green",
"Superficial-Intermediate": "green"
}
for i, cls in enumerate(classes):
cls_path = os.path.join(path, cls, main) # Construct the path to the directory containing the images for the current class.
images = os.listdir(cls_path)[:num_images]
for j, image_file in enumerate(images):
img_path = os.path.join(cls_path, image_file)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert the image from BGR (OpenCV default) to RGB (matplotlib default).
axes[i, j].imshow(img) # Display the image in the appropriate subplot.
axes[i, j].axis('off')
if j == 0: # Set the title of the first image in each row to the class name.
axes[i, j].set_title(cls, color='black', bbox=dict(facecolor=color_map[cls], edgecolor='none', pad=5))
plt.tight_layout()
plt.show()
print("COMPLETE")
display_images(root_dir, classes, "COMPLETE", 5)
COMPLETE
print("CROPPED")
display_images(root_dir, classes, "CROPPED", 5)
CROPPED
import os
import shutil
import numpy as np
# Function for Creating Train / Validation / Test folders (One time use Only)
def TrainValTestSplit(root_dir, classes_dir, main="CROPPED", val_ratio=0.15, test_ratio=0.15):
for cls in classes_dir:
# Creating Split Folders inside the root_dir
# For each class, create directories for training, validation, and test sets inside root_dir.
os.makedirs(os.path.join(root_dir, 'train', cls), exist_ok=True)
os.makedirs(os.path.join(root_dir, 'val', cls), exist_ok=True)
os.makedirs(os.path.join(root_dir, 'test', cls), exist_ok=True)
# Folder to copy images from
src = os.path.join(root_dir, cls, main)
# Splitting the Files in the Given ratio
# List all file names in the source directory.
allFileNames = os.listdir(src)
# Shuffle the file names to randomize the order.
np.random.shuffle(allFileNames)
# Split the file names into training, validation, and testing sets based on the specified ratios.
train_FileNames, val_FileNames, test_FileNames = np.split(
np.array(allFileNames),
[int(len(allFileNames) * (1 - (val_ratio + test_ratio))), int(len(allFileNames) * (1 - test_ratio))]
)
# Convert the file names into full file paths for training, validation, and testing sets.
train_FileNames = [os.path.join(src, name) for name in train_FileNames.tolist()]
val_FileNames = [os.path.join(src, name) for name in val_FileNames.tolist()]
test_FileNames = [os.path.join(src, name) for name in test_FileNames.tolist()]
# Printing the Split Details
# Print the number of total images, training images, validation images, and testing images for each class.
print(cls, ':')
print('Total images: ', len(allFileNames))
print('Training: ', len(train_FileNames))
print('Validation: ', len(val_FileNames))
print('Testing: ', len(test_FileNames))
# Copy-pasting images to respective directories
# Copy each image to its respective directory (train, val, or test) based on the split.
for name in train_FileNames:
shutil.copy(name, os.path.join(root_dir, 'train', cls))
for name in val_FileNames:
shutil.copy(name, os.path.join(root_dir, 'val', cls))
for name in test_FileNames:
shutil.copy(name, os.path.join(root_dir, 'test', cls))
print()
# Performing Train / Validation / Test Split
root_dir = "/content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer"
classes = ["Dyskeratotic", "Koilocytotic", "Metaplastic", "Parabasal", "Superficial-Intermediate"]
TrainValTestSplit(root_dir, classes)
Dyskeratotic : Total images: 813 Training: 569 Validation: 122 Testing: 122 Koilocytotic : Total images: 825 Training: 577 Validation: 124 Testing: 124 Metaplastic : Total images: 793 Training: 555 Validation: 119 Testing: 119 Parabasal : Total images: 787 Training: 550 Validation: 118 Testing: 119 Superficial-Intermediate : Total images: 831 Training: 581 Validation: 125 Testing: 125
import os
# Function to count number of images in each class directory for train, val, and test
def count_images_in_split_dirs(root_dir, classes):
splits = ['train', 'val', 'test']
counts = {split: {cls: 0 for cls in classes} for split in splits}
for split in splits:
for cls in classes:
class_dir = os.path.join(root_dir, split, cls)
if os.path.exists(class_dir):
counts[split][cls] = len(os.listdir(class_dir))
else:
counts[split][cls] = 0
return counts
# Define the root directory and classes
root_dir = "/content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer"
classes = ["Dyskeratotic", "Koilocytotic", "Metaplastic", "Parabasal", "Superficial-Intermediate"]
# Get the counts of images
image_counts = count_images_in_split_dirs(root_dir, classes)
# Print the counts
for split in image_counts:
print(f"\n{split.upper()}:")
for cls in image_counts[split]:
print(f" {cls}: {image_counts[split][cls]} images")
TRAIN: Dyskeratotic: 569 images Koilocytotic: 577 images Metaplastic: 555 images Parabasal: 550 images Superficial-Intermediate: 581 images VAL: Dyskeratotic: 122 images Koilocytotic: 124 images Metaplastic: 119 images Parabasal: 118 images Superficial-Intermediate: 125 images TEST: Dyskeratotic: 122 images Koilocytotic: 124 images Metaplastic: 119 images Parabasal: 119 images Superficial-Intermediate: 125 images
# Importing Necessary Libraries
import cv2
import os
import shutil
import math
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# import shutil
# # Clear the existing mount point
# shutil.rmtree('/content/drive')
# Mount Google Drive
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
root_dir = "/content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer"
classes = ["Dyskeratotic", "Koilocytotic", "Metaplastic", "Parabasal", "Superficial-Intermediate"]
# Importing Keras for Image Classification
import keras
from keras.layers import Dense,Conv2D, Flatten, MaxPool2D, Dropout
from keras.models import Sequential
from keras.preprocessing import image
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Load the ResNet50 model pre-trained on ImageNet, excluding the top layers
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(64, 64, 3))
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 94765736/94765736 [==============================] - 0s 0us/step
# Add custom top layers
x = base_model.output
x = Flatten()(x)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(5, activation='softmax')(x)
# Create the full model
model = Model(inputs=base_model.input, outputs=predictions)
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 64, 64, 3)] 0 []
conv1_pad (ZeroPadding2D) (None, 70, 70, 3) 0 ['input_1[0][0]']
conv1_conv (Conv2D) (None, 32, 32, 64) 9472 ['conv1_pad[0][0]']
conv1_bn (BatchNormalizati (None, 32, 32, 64) 256 ['conv1_conv[0][0]']
on)
conv1_relu (Activation) (None, 32, 32, 64) 0 ['conv1_bn[0][0]']
pool1_pad (ZeroPadding2D) (None, 34, 34, 64) 0 ['conv1_relu[0][0]']
pool1_pool (MaxPooling2D) (None, 16, 16, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2 (None, 16, 16, 64) 4160 ['pool1_pool[0][0]']
D)
conv2_block1_1_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block1_1_conv[0][0]']
rmalization)
conv2_block1_1_relu (Activ (None, 16, 16, 64) 0 ['conv2_block1_1_bn[0][0]']
ation)
conv2_block1_2_conv (Conv2 (None, 16, 16, 64) 36928 ['conv2_block1_1_relu[0][0]']
D)
conv2_block1_2_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block1_2_conv[0][0]']
rmalization)
conv2_block1_2_relu (Activ (None, 16, 16, 64) 0 ['conv2_block1_2_bn[0][0]']
ation)
conv2_block1_0_conv (Conv2 (None, 16, 16, 256) 16640 ['pool1_pool[0][0]']
D)
conv2_block1_3_conv (Conv2 (None, 16, 16, 256) 16640 ['conv2_block1_2_relu[0][0]']
D)
conv2_block1_0_bn (BatchNo (None, 16, 16, 256) 1024 ['conv2_block1_0_conv[0][0]']
rmalization)
conv2_block1_3_bn (BatchNo (None, 16, 16, 256) 1024 ['conv2_block1_3_conv[0][0]']
rmalization)
conv2_block1_add (Add) (None, 16, 16, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activati (None, 16, 16, 256) 0 ['conv2_block1_add[0][0]']
on)
conv2_block2_1_conv (Conv2 (None, 16, 16, 64) 16448 ['conv2_block1_out[0][0]']
D)
conv2_block2_1_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block2_1_conv[0][0]']
rmalization)
conv2_block2_1_relu (Activ (None, 16, 16, 64) 0 ['conv2_block2_1_bn[0][0]']
ation)
conv2_block2_2_conv (Conv2 (None, 16, 16, 64) 36928 ['conv2_block2_1_relu[0][0]']
D)
conv2_block2_2_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block2_2_conv[0][0]']
rmalization)
conv2_block2_2_relu (Activ (None, 16, 16, 64) 0 ['conv2_block2_2_bn[0][0]']
ation)
conv2_block2_3_conv (Conv2 (None, 16, 16, 256) 16640 ['conv2_block2_2_relu[0][0]']
D)
conv2_block2_3_bn (BatchNo (None, 16, 16, 256) 1024 ['conv2_block2_3_conv[0][0]']
rmalization)
conv2_block2_add (Add) (None, 16, 16, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activati (None, 16, 16, 256) 0 ['conv2_block2_add[0][0]']
on)
conv2_block3_1_conv (Conv2 (None, 16, 16, 64) 16448 ['conv2_block2_out[0][0]']
D)
conv2_block3_1_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block3_1_conv[0][0]']
rmalization)
conv2_block3_1_relu (Activ (None, 16, 16, 64) 0 ['conv2_block3_1_bn[0][0]']
ation)
conv2_block3_2_conv (Conv2 (None, 16, 16, 64) 36928 ['conv2_block3_1_relu[0][0]']
D)
conv2_block3_2_bn (BatchNo (None, 16, 16, 64) 256 ['conv2_block3_2_conv[0][0]']
rmalization)
conv2_block3_2_relu (Activ (None, 16, 16, 64) 0 ['conv2_block3_2_bn[0][0]']
ation)
conv2_block3_3_conv (Conv2 (None, 16, 16, 256) 16640 ['conv2_block3_2_relu[0][0]']
D)
conv2_block3_3_bn (BatchNo (None, 16, 16, 256) 1024 ['conv2_block3_3_conv[0][0]']
rmalization)
conv2_block3_add (Add) (None, 16, 16, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activati (None, 16, 16, 256) 0 ['conv2_block3_add[0][0]']
on)
conv3_block1_1_conv (Conv2 (None, 8, 8, 128) 32896 ['conv2_block3_out[0][0]']
D)
conv3_block1_1_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block1_1_conv[0][0]']
rmalization)
conv3_block1_1_relu (Activ (None, 8, 8, 128) 0 ['conv3_block1_1_bn[0][0]']
ation)
conv3_block1_2_conv (Conv2 (None, 8, 8, 128) 147584 ['conv3_block1_1_relu[0][0]']
D)
conv3_block1_2_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block1_2_conv[0][0]']
rmalization)
conv3_block1_2_relu (Activ (None, 8, 8, 128) 0 ['conv3_block1_2_bn[0][0]']
ation)
conv3_block1_0_conv (Conv2 (None, 8, 8, 512) 131584 ['conv2_block3_out[0][0]']
D)
conv3_block1_3_conv (Conv2 (None, 8, 8, 512) 66048 ['conv3_block1_2_relu[0][0]']
D)
conv3_block1_0_bn (BatchNo (None, 8, 8, 512) 2048 ['conv3_block1_0_conv[0][0]']
rmalization)
conv3_block1_3_bn (BatchNo (None, 8, 8, 512) 2048 ['conv3_block1_3_conv[0][0]']
rmalization)
conv3_block1_add (Add) (None, 8, 8, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activati (None, 8, 8, 512) 0 ['conv3_block1_add[0][0]']
on)
conv3_block2_1_conv (Conv2 (None, 8, 8, 128) 65664 ['conv3_block1_out[0][0]']
D)
conv3_block2_1_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block2_1_conv[0][0]']
rmalization)
conv3_block2_1_relu (Activ (None, 8, 8, 128) 0 ['conv3_block2_1_bn[0][0]']
ation)
conv3_block2_2_conv (Conv2 (None, 8, 8, 128) 147584 ['conv3_block2_1_relu[0][0]']
D)
conv3_block2_2_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block2_2_conv[0][0]']
rmalization)
conv3_block2_2_relu (Activ (None, 8, 8, 128) 0 ['conv3_block2_2_bn[0][0]']
ation)
conv3_block2_3_conv (Conv2 (None, 8, 8, 512) 66048 ['conv3_block2_2_relu[0][0]']
D)
conv3_block2_3_bn (BatchNo (None, 8, 8, 512) 2048 ['conv3_block2_3_conv[0][0]']
rmalization)
conv3_block2_add (Add) (None, 8, 8, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activati (None, 8, 8, 512) 0 ['conv3_block2_add[0][0]']
on)
conv3_block3_1_conv (Conv2 (None, 8, 8, 128) 65664 ['conv3_block2_out[0][0]']
D)
conv3_block3_1_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block3_1_conv[0][0]']
rmalization)
conv3_block3_1_relu (Activ (None, 8, 8, 128) 0 ['conv3_block3_1_bn[0][0]']
ation)
conv3_block3_2_conv (Conv2 (None, 8, 8, 128) 147584 ['conv3_block3_1_relu[0][0]']
D)
conv3_block3_2_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block3_2_conv[0][0]']
rmalization)
conv3_block3_2_relu (Activ (None, 8, 8, 128) 0 ['conv3_block3_2_bn[0][0]']
ation)
conv3_block3_3_conv (Conv2 (None, 8, 8, 512) 66048 ['conv3_block3_2_relu[0][0]']
D)
conv3_block3_3_bn (BatchNo (None, 8, 8, 512) 2048 ['conv3_block3_3_conv[0][0]']
rmalization)
conv3_block3_add (Add) (None, 8, 8, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activati (None, 8, 8, 512) 0 ['conv3_block3_add[0][0]']
on)
conv3_block4_1_conv (Conv2 (None, 8, 8, 128) 65664 ['conv3_block3_out[0][0]']
D)
conv3_block4_1_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block4_1_conv[0][0]']
rmalization)
conv3_block4_1_relu (Activ (None, 8, 8, 128) 0 ['conv3_block4_1_bn[0][0]']
ation)
conv3_block4_2_conv (Conv2 (None, 8, 8, 128) 147584 ['conv3_block4_1_relu[0][0]']
D)
conv3_block4_2_bn (BatchNo (None, 8, 8, 128) 512 ['conv3_block4_2_conv[0][0]']
rmalization)
conv3_block4_2_relu (Activ (None, 8, 8, 128) 0 ['conv3_block4_2_bn[0][0]']
ation)
conv3_block4_3_conv (Conv2 (None, 8, 8, 512) 66048 ['conv3_block4_2_relu[0][0]']
D)
conv3_block4_3_bn (BatchNo (None, 8, 8, 512) 2048 ['conv3_block4_3_conv[0][0]']
rmalization)
conv3_block4_add (Add) (None, 8, 8, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activati (None, 8, 8, 512) 0 ['conv3_block4_add[0][0]']
on)
conv4_block1_1_conv (Conv2 (None, 4, 4, 256) 131328 ['conv3_block4_out[0][0]']
D)
conv4_block1_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block1_1_conv[0][0]']
rmalization)
conv4_block1_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block1_1_bn[0][0]']
ation)
conv4_block1_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block1_1_relu[0][0]']
D)
conv4_block1_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block1_2_conv[0][0]']
rmalization)
conv4_block1_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block1_2_bn[0][0]']
ation)
conv4_block1_0_conv (Conv2 (None, 4, 4, 1024) 525312 ['conv3_block4_out[0][0]']
D)
conv4_block1_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block1_2_relu[0][0]']
D)
conv4_block1_0_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block1_0_conv[0][0]']
rmalization)
conv4_block1_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block1_3_conv[0][0]']
rmalization)
conv4_block1_add (Add) (None, 4, 4, 1024) 0 ['conv4_block1_0_bn[0][0]',
'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activati (None, 4, 4, 1024) 0 ['conv4_block1_add[0][0]']
on)
conv4_block2_1_conv (Conv2 (None, 4, 4, 256) 262400 ['conv4_block1_out[0][0]']
D)
conv4_block2_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block2_1_conv[0][0]']
rmalization)
conv4_block2_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block2_1_bn[0][0]']
ation)
conv4_block2_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block2_1_relu[0][0]']
D)
conv4_block2_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block2_2_conv[0][0]']
rmalization)
conv4_block2_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block2_2_bn[0][0]']
ation)
conv4_block2_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block2_2_relu[0][0]']
D)
conv4_block2_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block2_3_conv[0][0]']
rmalization)
conv4_block2_add (Add) (None, 4, 4, 1024) 0 ['conv4_block1_out[0][0]',
'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activati (None, 4, 4, 1024) 0 ['conv4_block2_add[0][0]']
on)
conv4_block3_1_conv (Conv2 (None, 4, 4, 256) 262400 ['conv4_block2_out[0][0]']
D)
conv4_block3_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block3_1_conv[0][0]']
rmalization)
conv4_block3_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block3_1_bn[0][0]']
ation)
conv4_block3_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block3_1_relu[0][0]']
D)
conv4_block3_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block3_2_conv[0][0]']
rmalization)
conv4_block3_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block3_2_bn[0][0]']
ation)
conv4_block3_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block3_2_relu[0][0]']
D)
conv4_block3_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block3_3_conv[0][0]']
rmalization)
conv4_block3_add (Add) (None, 4, 4, 1024) 0 ['conv4_block2_out[0][0]',
'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activati (None, 4, 4, 1024) 0 ['conv4_block3_add[0][0]']
on)
conv4_block4_1_conv (Conv2 (None, 4, 4, 256) 262400 ['conv4_block3_out[0][0]']
D)
conv4_block4_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block4_1_conv[0][0]']
rmalization)
conv4_block4_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block4_1_bn[0][0]']
ation)
conv4_block4_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block4_1_relu[0][0]']
D)
conv4_block4_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block4_2_conv[0][0]']
rmalization)
conv4_block4_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block4_2_bn[0][0]']
ation)
conv4_block4_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block4_2_relu[0][0]']
D)
conv4_block4_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block4_3_conv[0][0]']
rmalization)
conv4_block4_add (Add) (None, 4, 4, 1024) 0 ['conv4_block3_out[0][0]',
'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activati (None, 4, 4, 1024) 0 ['conv4_block4_add[0][0]']
on)
conv4_block5_1_conv (Conv2 (None, 4, 4, 256) 262400 ['conv4_block4_out[0][0]']
D)
conv4_block5_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block5_1_conv[0][0]']
rmalization)
conv4_block5_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block5_1_bn[0][0]']
ation)
conv4_block5_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block5_1_relu[0][0]']
D)
conv4_block5_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block5_2_conv[0][0]']
rmalization)
conv4_block5_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block5_2_bn[0][0]']
ation)
conv4_block5_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block5_2_relu[0][0]']
D)
conv4_block5_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block5_3_conv[0][0]']
rmalization)
conv4_block5_add (Add) (None, 4, 4, 1024) 0 ['conv4_block4_out[0][0]',
'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activati (None, 4, 4, 1024) 0 ['conv4_block5_add[0][0]']
on)
conv4_block6_1_conv (Conv2 (None, 4, 4, 256) 262400 ['conv4_block5_out[0][0]']
D)
conv4_block6_1_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block6_1_conv[0][0]']
rmalization)
conv4_block6_1_relu (Activ (None, 4, 4, 256) 0 ['conv4_block6_1_bn[0][0]']
ation)
conv4_block6_2_conv (Conv2 (None, 4, 4, 256) 590080 ['conv4_block6_1_relu[0][0]']
D)
conv4_block6_2_bn (BatchNo (None, 4, 4, 256) 1024 ['conv4_block6_2_conv[0][0]']
rmalization)
conv4_block6_2_relu (Activ (None, 4, 4, 256) 0 ['conv4_block6_2_bn[0][0]']
ation)
conv4_block6_3_conv (Conv2 (None, 4, 4, 1024) 263168 ['conv4_block6_2_relu[0][0]']
D)
conv4_block6_3_bn (BatchNo (None, 4, 4, 1024) 4096 ['conv4_block6_3_conv[0][0]']
rmalization)
conv4_block6_add (Add) (None, 4, 4, 1024) 0 ['conv4_block5_out[0][0]',
'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activati (None, 4, 4, 1024) 0 ['conv4_block6_add[0][0]']
on)
conv5_block1_1_conv (Conv2 (None, 2, 2, 512) 524800 ['conv4_block6_out[0][0]']
D)
conv5_block1_1_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block1_1_conv[0][0]']
rmalization)
conv5_block1_1_relu (Activ (None, 2, 2, 512) 0 ['conv5_block1_1_bn[0][0]']
ation)
conv5_block1_2_conv (Conv2 (None, 2, 2, 512) 2359808 ['conv5_block1_1_relu[0][0]']
D)
conv5_block1_2_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block1_2_conv[0][0]']
rmalization)
conv5_block1_2_relu (Activ (None, 2, 2, 512) 0 ['conv5_block1_2_bn[0][0]']
ation)
conv5_block1_0_conv (Conv2 (None, 2, 2, 2048) 2099200 ['conv4_block6_out[0][0]']
D)
conv5_block1_3_conv (Conv2 (None, 2, 2, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
D)
conv5_block1_0_bn (BatchNo (None, 2, 2, 2048) 8192 ['conv5_block1_0_conv[0][0]']
rmalization)
conv5_block1_3_bn (BatchNo (None, 2, 2, 2048) 8192 ['conv5_block1_3_conv[0][0]']
rmalization)
conv5_block1_add (Add) (None, 2, 2, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activati (None, 2, 2, 2048) 0 ['conv5_block1_add[0][0]']
on)
conv5_block2_1_conv (Conv2 (None, 2, 2, 512) 1049088 ['conv5_block1_out[0][0]']
D)
conv5_block2_1_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block2_1_conv[0][0]']
rmalization)
conv5_block2_1_relu (Activ (None, 2, 2, 512) 0 ['conv5_block2_1_bn[0][0]']
ation)
conv5_block2_2_conv (Conv2 (None, 2, 2, 512) 2359808 ['conv5_block2_1_relu[0][0]']
D)
conv5_block2_2_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block2_2_conv[0][0]']
rmalization)
conv5_block2_2_relu (Activ (None, 2, 2, 512) 0 ['conv5_block2_2_bn[0][0]']
ation)
conv5_block2_3_conv (Conv2 (None, 2, 2, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
D)
conv5_block2_3_bn (BatchNo (None, 2, 2, 2048) 8192 ['conv5_block2_3_conv[0][0]']
rmalization)
conv5_block2_add (Add) (None, 2, 2, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activati (None, 2, 2, 2048) 0 ['conv5_block2_add[0][0]']
on)
conv5_block3_1_conv (Conv2 (None, 2, 2, 512) 1049088 ['conv5_block2_out[0][0]']
D)
conv5_block3_1_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block3_1_conv[0][0]']
rmalization)
conv5_block3_1_relu (Activ (None, 2, 2, 512) 0 ['conv5_block3_1_bn[0][0]']
ation)
conv5_block3_2_conv (Conv2 (None, 2, 2, 512) 2359808 ['conv5_block3_1_relu[0][0]']
D)
conv5_block3_2_bn (BatchNo (None, 2, 2, 512) 2048 ['conv5_block3_2_conv[0][0]']
rmalization)
conv5_block3_2_relu (Activ (None, 2, 2, 512) 0 ['conv5_block3_2_bn[0][0]']
ation)
conv5_block3_3_conv (Conv2 (None, 2, 2, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
D)
conv5_block3_3_bn (BatchNo (None, 2, 2, 2048) 8192 ['conv5_block3_3_conv[0][0]']
rmalization)
conv5_block3_add (Add) (None, 2, 2, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activati (None, 2, 2, 2048) 0 ['conv5_block3_add[0][0]']
on)
flatten (Flatten) (None, 8192) 0 ['conv5_block3_out[0][0]']
dense (Dense) (None, 4096) 3355852 ['flatten[0][0]']
8
dropout (Dropout) (None, 4096) 0 ['dense[0][0]']
dense_1 (Dense) (None, 4096) 1678131 ['dropout[0][0]']
2
dropout_1 (Dropout) (None, 4096) 0 ['dense_1[0][0]']
dense_2 (Dense) (None, 5) 20485 ['dropout_1[0][0]']
==================================================================================================
Total params: 73948037 (282.09 MB)
Trainable params: 73894917 (281.89 MB)
Non-trainable params: 53120 (207.50 KB)
__________________________________________________________________________________________________
# Expand the size of dataset with new transformed images from the original dataset using ImageDataGenerator.
train_datagen = image.ImageDataGenerator(zoom_range = 0.2, shear_range = 0.2 , rescale = 1./255 , horizontal_flip=True)
val_datagen = image.ImageDataGenerator(rescale = 1./255)
test_datagen = image.ImageDataGenerator(rescale = 1./255)
# The train_data object is an instance of a Keras DirectoryIterator, which generates batches of data from the specified directory.
# The flow_from_directory method reads images from the specified directory and applies the transformations defined in the train_datagen object (such as augmentation and normalization).
# Resizes Images: All images are resized to 64x64 pixels.
# Batch Processing: Images are processed and yielded in batches of 100.
# Categorical Labels: The labels for the images are one-hot encoded.
# The flow_from_directory function is a powerful way to generate batches of tensor image data with real-time data augmentation. It is especially useful when having a large dataset organized into subdirectories by class. The function reads the images, applies the specified preprocessing steps, and yields them in batches for training the neural network.
train_data = train_datagen.flow_from_directory(directory= root_dir + "/train", target_size=(64, 64), batch_size=100, class_mode = 'categorical')
Found 2832 images belonging to 5 classes.
train_data.class_indices
{'Dyskeratotic': 0,
'Koilocytotic': 1,
'Metaplastic': 2,
'Parabasal': 3,
'Superficial-Intermediate': 4}
val_data = val_datagen.flow_from_directory(directory= root_dir + "/val", target_size=(64, 64), batch_size=100, class_mode = 'categorical')
Found 608 images belonging to 5 classes.
test_data = test_datagen.flow_from_directory(directory= root_dir + "/test", target_size=(64, 64), batch_size=100, class_mode = 'categorical')
Found 609 images belonging to 5 classes.
# Adding Model check point Callback
# This callback is used during the training process to save the model weights. It monitors the validation accuracy and saves the model only if there is an improvement.
from tensorflow.keras.callbacks import ModelCheckpoint
# Define the full filepath for saving the best model
filepath = os.path.join(root_dir, "cervical_cancer_best_model_ResNet-50.hdf5")
# Adding Model Checkpoint Callback
mc = ModelCheckpoint(
filepath=filepath,
monitor='val_accuracy',
verbose=1, # When set to 1, the callback will print messages when the model is being saved.
save_best_only=True, # When set to True, the callback saves the model only when the monitored metric (val_accuracy) improves. This ensures that only the best model, in terms of validation accuracy, is saved.
mode='auto'
)
call_back = [mc]
# Fitting the Model
# steps_per_epoch = 28 (how many batches on one epoch)
# This parameter defines the number of batches of samples to be used in each epoch.
# Essentially, it is the number of times the model will be updated in one epoch.
# Since the batch size is 100, then 28 steps per epoch mean that the model will see 2800 (28 * 100) samples in one epoch.
# validation_steps=6
# This parameter defines the number of batches of samples to be used in each validation epoch.
# This means that in each epoch, the model will see 600 (6 * 100) samples from the validation set.
cnn = model.fit(train_data,
steps_per_epoch= 28,
epochs= 64,
validation_data= val_data,
validation_steps= 6,
callbacks = call_back )
Epoch 1/64 28/28 [==============================] - ETA: 0s - loss: 3.2650 - accuracy: 0.6856 Epoch 1: val_accuracy improved from -inf to 0.20333, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 623s 21s/step - loss: 3.2650 - accuracy: 0.6856 - val_loss: 134.0110 - val_accuracy: 0.2033 Epoch 2/64 28/28 [==============================] - ETA: 0s - loss: 0.4542 - accuracy: 0.8712 Epoch 2: val_accuracy did not improve from 0.20333 28/28 [==============================] - 14s 413ms/step - loss: 0.4542 - accuracy: 0.8712 - val_loss: 4.7824 - val_accuracy: 0.1383 Epoch 3/64 28/28 [==============================] - ETA: 0s - loss: 0.4179 - accuracy: 0.8909 Epoch 3: val_accuracy did not improve from 0.20333 28/28 [==============================] - 11s 392ms/step - loss: 0.4179 - accuracy: 0.8909 - val_loss: 3.3701 - val_accuracy: 0.1900 Epoch 4/64 28/28 [==============================] - ETA: 0s - loss: 0.5150 - accuracy: 0.8459 Epoch 4: val_accuracy did not improve from 0.20333 28/28 [==============================] - 11s 381ms/step - loss: 0.5150 - accuracy: 0.8459 - val_loss: 18.4217 - val_accuracy: 0.1933 Epoch 5/64 28/28 [==============================] - ETA: 0s - loss: 0.3432 - accuracy: 0.8982 Epoch 5: val_accuracy did not improve from 0.20333 28/28 [==============================] - 11s 392ms/step - loss: 0.3432 - accuracy: 0.8982 - val_loss: 16.1079 - val_accuracy: 0.1950 Epoch 6/64 28/28 [==============================] - ETA: 0s - loss: 0.2432 - accuracy: 0.9154 Epoch 6: val_accuracy did not improve from 0.20333 28/28 [==============================] - 11s 391ms/step - loss: 0.2432 - accuracy: 0.9154 - val_loss: 16.7747 - val_accuracy: 0.1933 Epoch 7/64 28/28 [==============================] - ETA: 0s - loss: 0.2177 - accuracy: 0.9286 Epoch 7: val_accuracy did not improve from 0.20333 28/28 [==============================] - 11s 385ms/step - loss: 0.2177 - accuracy: 0.9286 - val_loss: 7.3999 - val_accuracy: 0.1900 Epoch 8/64 28/28 [==============================] - ETA: 0s - loss: 0.1532 - accuracy: 0.9510 Epoch 8: val_accuracy improved from 0.20333 to 0.24167, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 660ms/step - loss: 0.1532 - accuracy: 0.9510 - val_loss: 1.7790 - val_accuracy: 0.2417 Epoch 9/64 28/28 [==============================] - ETA: 0s - loss: 0.1991 - accuracy: 0.9319 Epoch 9: val_accuracy did not improve from 0.24167 28/28 [==============================] - 13s 424ms/step - loss: 0.1991 - accuracy: 0.9319 - val_loss: 7.3242 - val_accuracy: 0.1900 Epoch 10/64 28/28 [==============================] - ETA: 0s - loss: 0.1389 - accuracy: 0.9553 Epoch 10: val_accuracy did not improve from 0.24167 28/28 [==============================] - 11s 383ms/step - loss: 0.1389 - accuracy: 0.9553 - val_loss: 2.9678 - val_accuracy: 0.1950 Epoch 11/64 28/28 [==============================] - ETA: 0s - loss: 0.1643 - accuracy: 0.9520 Epoch 11: val_accuracy did not improve from 0.24167 28/28 [==============================] - 11s 372ms/step - loss: 0.1643 - accuracy: 0.9520 - val_loss: 2.1639 - val_accuracy: 0.1950 Epoch 12/64 28/28 [==============================] - ETA: 0s - loss: 0.1349 - accuracy: 0.9550 Epoch 12: val_accuracy did not improve from 0.24167 28/28 [==============================] - 11s 386ms/step - loss: 0.1349 - accuracy: 0.9550 - val_loss: 2.0321 - val_accuracy: 0.1900 Epoch 13/64 28/28 [==============================] - ETA: 0s - loss: 0.1199 - accuracy: 0.9597 Epoch 13: val_accuracy did not improve from 0.24167 28/28 [==============================] - 11s 382ms/step - loss: 0.1199 - accuracy: 0.9597 - val_loss: 1.7080 - val_accuracy: 0.2033 Epoch 14/64 28/28 [==============================] - ETA: 0s - loss: 0.1202 - accuracy: 0.9594 Epoch 14: val_accuracy improved from 0.24167 to 0.28500, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 19s 696ms/step - loss: 0.1202 - accuracy: 0.9594 - val_loss: 1.6064 - val_accuracy: 0.2850 Epoch 15/64 28/28 [==============================] - ETA: 0s - loss: 0.0932 - accuracy: 0.9740 Epoch 15: val_accuracy improved from 0.28500 to 0.30833, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 552ms/step - loss: 0.0932 - accuracy: 0.9740 - val_loss: 1.6012 - val_accuracy: 0.3083 Epoch 16/64 28/28 [==============================] - ETA: 0s - loss: 0.0912 - accuracy: 0.9674 Epoch 16: val_accuracy improved from 0.30833 to 0.33167, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 16s 521ms/step - loss: 0.0912 - accuracy: 0.9674 - val_loss: 1.5446 - val_accuracy: 0.3317 Epoch 17/64 28/28 [==============================] - ETA: 0s - loss: 0.1173 - accuracy: 0.9632 Epoch 17: val_accuracy did not improve from 0.33167 28/28 [==============================] - 13s 414ms/step - loss: 0.1173 - accuracy: 0.9632 - val_loss: 1.6697 - val_accuracy: 0.2267 Epoch 18/64 28/28 [==============================] - ETA: 0s - loss: 0.1021 - accuracy: 0.9656 Epoch 18: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 395ms/step - loss: 0.1021 - accuracy: 0.9656 - val_loss: 1.9251 - val_accuracy: 0.3267 Epoch 19/64 28/28 [==============================] - ETA: 0s - loss: 0.1108 - accuracy: 0.9700 Epoch 19: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 391ms/step - loss: 0.1108 - accuracy: 0.9700 - val_loss: 1.5243 - val_accuracy: 0.3067 Epoch 20/64 28/28 [==============================] - ETA: 0s - loss: 0.3618 - accuracy: 0.9337 Epoch 20: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 389ms/step - loss: 0.3618 - accuracy: 0.9337 - val_loss: 7.2511 - val_accuracy: 0.1933 Epoch 21/64 28/28 [==============================] - ETA: 0s - loss: 0.2331 - accuracy: 0.9436 Epoch 21: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 378ms/step - loss: 0.2331 - accuracy: 0.9436 - val_loss: 2.1379 - val_accuracy: 0.3000 Epoch 22/64 28/28 [==============================] - ETA: 0s - loss: 0.4373 - accuracy: 0.8993 Epoch 22: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 405ms/step - loss: 0.4373 - accuracy: 0.8993 - val_loss: 2.1124 - val_accuracy: 0.2233 Epoch 23/64 28/28 [==============================] - ETA: 0s - loss: 0.3737 - accuracy: 0.9253 Epoch 23: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 400ms/step - loss: 0.3737 - accuracy: 0.9253 - val_loss: 2.4645 - val_accuracy: 0.2667 Epoch 24/64 28/28 [==============================] - ETA: 0s - loss: 0.2282 - accuracy: 0.9268 Epoch 24: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 380ms/step - loss: 0.2282 - accuracy: 0.9268 - val_loss: 7.9874 - val_accuracy: 0.1950 Epoch 25/64 28/28 [==============================] - ETA: 0s - loss: 0.1600 - accuracy: 0.9458 Epoch 25: val_accuracy did not improve from 0.33167 28/28 [==============================] - 11s 378ms/step - loss: 0.1600 - accuracy: 0.9458 - val_loss: 3.7202 - val_accuracy: 0.2550 Epoch 26/64 28/28 [==============================] - ETA: 0s - loss: 0.1278 - accuracy: 0.9619 Epoch 26: val_accuracy improved from 0.33167 to 0.46667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 626ms/step - loss: 0.1278 - accuracy: 0.9619 - val_loss: 1.3180 - val_accuracy: 0.4667 Epoch 27/64 28/28 [==============================] - ETA: 0s - loss: 0.1052 - accuracy: 0.9674 Epoch 27: val_accuracy improved from 0.46667 to 0.60833, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 541ms/step - loss: 0.1052 - accuracy: 0.9674 - val_loss: 0.9041 - val_accuracy: 0.6083 Epoch 28/64 28/28 [==============================] - ETA: 0s - loss: 0.0964 - accuracy: 0.9652 Epoch 28: val_accuracy improved from 0.60833 to 0.62667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 524ms/step - loss: 0.0964 - accuracy: 0.9652 - val_loss: 1.1273 - val_accuracy: 0.6267 Epoch 29/64 28/28 [==============================] - ETA: 0s - loss: 0.0857 - accuracy: 0.9693 Epoch 29: val_accuracy did not improve from 0.62667 28/28 [==============================] - 13s 393ms/step - loss: 0.0857 - accuracy: 0.9693 - val_loss: 1.5595 - val_accuracy: 0.5433 Epoch 30/64 28/28 [==============================] - ETA: 0s - loss: 0.0949 - accuracy: 0.9722 Epoch 30: val_accuracy improved from 0.62667 to 0.77500, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 659ms/step - loss: 0.0949 - accuracy: 0.9722 - val_loss: 0.6040 - val_accuracy: 0.7750 Epoch 31/64 28/28 [==============================] - ETA: 0s - loss: 0.0831 - accuracy: 0.9733 Epoch 31: val_accuracy did not improve from 0.77500 28/28 [==============================] - 13s 398ms/step - loss: 0.0831 - accuracy: 0.9733 - val_loss: 1.1937 - val_accuracy: 0.7233 Epoch 32/64 28/28 [==============================] - ETA: 0s - loss: 0.0702 - accuracy: 0.9791 Epoch 32: val_accuracy did not improve from 0.77500 28/28 [==============================] - 11s 387ms/step - loss: 0.0702 - accuracy: 0.9791 - val_loss: 0.8551 - val_accuracy: 0.7317 Epoch 33/64 28/28 [==============================] - ETA: 0s - loss: 0.0992 - accuracy: 0.9689 Epoch 33: val_accuracy improved from 0.77500 to 0.82833, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 636ms/step - loss: 0.0992 - accuracy: 0.9689 - val_loss: 0.4774 - val_accuracy: 0.8283 Epoch 34/64 28/28 [==============================] - ETA: 0s - loss: 0.0823 - accuracy: 0.9711 Epoch 34: val_accuracy improved from 0.82833 to 0.86333, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 518ms/step - loss: 0.0823 - accuracy: 0.9711 - val_loss: 0.5064 - val_accuracy: 0.8633 Epoch 35/64 28/28 [==============================] - ETA: 0s - loss: 0.1023 - accuracy: 0.9704 Epoch 35: val_accuracy improved from 0.86333 to 0.86667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 524ms/step - loss: 0.1023 - accuracy: 0.9704 - val_loss: 0.4030 - val_accuracy: 0.8667 Epoch 36/64 28/28 [==============================] - ETA: 0s - loss: 0.1072 - accuracy: 0.9788 Epoch 36: val_accuracy improved from 0.86667 to 0.87000, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 16s 518ms/step - loss: 0.1072 - accuracy: 0.9788 - val_loss: 0.4046 - val_accuracy: 0.8700 Epoch 37/64 28/28 [==============================] - ETA: 0s - loss: 0.0698 - accuracy: 0.9780 Epoch 37: val_accuracy did not improve from 0.87000 28/28 [==============================] - 13s 396ms/step - loss: 0.0698 - accuracy: 0.9780 - val_loss: 1.3041 - val_accuracy: 0.7350 Epoch 38/64 28/28 [==============================] - ETA: 0s - loss: 0.0576 - accuracy: 0.9813 Epoch 38: val_accuracy improved from 0.87000 to 0.89167, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 642ms/step - loss: 0.0576 - accuracy: 0.9813 - val_loss: 0.4004 - val_accuracy: 0.8917 Epoch 39/64 28/28 [==============================] - ETA: 0s - loss: 0.0878 - accuracy: 0.9725 Epoch 39: val_accuracy improved from 0.89167 to 0.89667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 17s 551ms/step - loss: 0.0878 - accuracy: 0.9725 - val_loss: 0.3704 - val_accuracy: 0.8967 Epoch 40/64 28/28 [==============================] - ETA: 0s - loss: 0.0606 - accuracy: 0.9802 Epoch 40: val_accuracy improved from 0.89667 to 0.92000, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 16s 527ms/step - loss: 0.0606 - accuracy: 0.9802 - val_loss: 0.2797 - val_accuracy: 0.9200 Epoch 41/64 28/28 [==============================] - ETA: 0s - loss: 0.0614 - accuracy: 0.9802 Epoch 41: val_accuracy did not improve from 0.92000 28/28 [==============================] - 13s 401ms/step - loss: 0.0614 - accuracy: 0.9802 - val_loss: 0.5112 - val_accuracy: 0.8650 Epoch 42/64 28/28 [==============================] - ETA: 0s - loss: 0.0571 - accuracy: 0.9810 Epoch 42: val_accuracy did not improve from 0.92000 28/28 [==============================] - 11s 393ms/step - loss: 0.0571 - accuracy: 0.9810 - val_loss: 0.3114 - val_accuracy: 0.9133 Epoch 43/64 28/28 [==============================] - ETA: 0s - loss: 0.0332 - accuracy: 0.9879 Epoch 43: val_accuracy improved from 0.92000 to 0.92167, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 19s 697ms/step - loss: 0.0332 - accuracy: 0.9879 - val_loss: 0.3456 - val_accuracy: 0.9217 Epoch 44/64 28/28 [==============================] - ETA: 0s - loss: 0.0381 - accuracy: 0.9876 Epoch 44: val_accuracy improved from 0.92167 to 0.92667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 16s 511ms/step - loss: 0.0381 - accuracy: 0.9876 - val_loss: 0.3255 - val_accuracy: 0.9267 Epoch 45/64 28/28 [==============================] - ETA: 0s - loss: 0.0835 - accuracy: 0.9747 Epoch 45: val_accuracy did not improve from 0.92667 28/28 [==============================] - 13s 391ms/step - loss: 0.0835 - accuracy: 0.9747 - val_loss: 0.3059 - val_accuracy: 0.9267 Epoch 46/64 28/28 [==============================] - ETA: 0s - loss: 0.0517 - accuracy: 0.9835 Epoch 46: val_accuracy improved from 0.92667 to 0.94167, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 638ms/step - loss: 0.0517 - accuracy: 0.9835 - val_loss: 0.2137 - val_accuracy: 0.9417 Epoch 47/64 28/28 [==============================] - ETA: 0s - loss: 0.0469 - accuracy: 0.9865 Epoch 47: val_accuracy did not improve from 0.94167 28/28 [==============================] - 13s 396ms/step - loss: 0.0469 - accuracy: 0.9865 - val_loss: 0.2654 - val_accuracy: 0.9267 Epoch 48/64 28/28 [==============================] - ETA: 0s - loss: 0.0672 - accuracy: 0.9817 Epoch 48: val_accuracy did not improve from 0.94167 28/28 [==============================] - 11s 400ms/step - loss: 0.0672 - accuracy: 0.9817 - val_loss: 0.3675 - val_accuracy: 0.9000 Epoch 49/64 28/28 [==============================] - ETA: 0s - loss: 0.0505 - accuracy: 0.9868 Epoch 49: val_accuracy improved from 0.94167 to 0.94667, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 19s 686ms/step - loss: 0.0505 - accuracy: 0.9868 - val_loss: 0.2346 - val_accuracy: 0.9467 Epoch 50/64 28/28 [==============================] - ETA: 0s - loss: 0.0715 - accuracy: 0.9810 Epoch 50: val_accuracy did not improve from 0.94667 28/28 [==============================] - 13s 399ms/step - loss: 0.0715 - accuracy: 0.9810 - val_loss: 23.1429 - val_accuracy: 0.7333 Epoch 51/64 28/28 [==============================] - ETA: 0s - loss: 0.0664 - accuracy: 0.9769 Epoch 51: val_accuracy did not improve from 0.94667 28/28 [==============================] - 11s 385ms/step - loss: 0.0664 - accuracy: 0.9769 - val_loss: 0.8457 - val_accuracy: 0.8517 Epoch 52/64 28/28 [==============================] - ETA: 0s - loss: 0.0682 - accuracy: 0.9799 Epoch 52: val_accuracy did not improve from 0.94667 28/28 [==============================] - 11s 382ms/step - loss: 0.0682 - accuracy: 0.9799 - val_loss: 0.2601 - val_accuracy: 0.9183 Epoch 53/64 28/28 [==============================] - ETA: 0s - loss: 0.0496 - accuracy: 0.9843 Epoch 53: val_accuracy did not improve from 0.94667 28/28 [==============================] - 11s 374ms/step - loss: 0.0496 - accuracy: 0.9843 - val_loss: 0.2636 - val_accuracy: 0.9267 Epoch 54/64 28/28 [==============================] - ETA: 0s - loss: 0.0440 - accuracy: 0.9865 Epoch 54: val_accuracy did not improve from 0.94667 28/28 [==============================] - 11s 379ms/step - loss: 0.0440 - accuracy: 0.9865 - val_loss: 0.2385 - val_accuracy: 0.9417 Epoch 55/64 28/28 [==============================] - ETA: 0s - loss: 0.0246 - accuracy: 0.9918 Epoch 55: val_accuracy improved from 0.94667 to 0.94833, saving model to /content/drive/Shareddrives/Computer Vision Final Project/CervicalCancer/cervical_cancer_best_model_ResNet-50.hdf5 28/28 [==============================] - 18s 636ms/step - loss: 0.0246 - accuracy: 0.9918 - val_loss: 0.2595 - val_accuracy: 0.9483 Epoch 56/64 28/28 [==============================] - ETA: 0s - loss: 0.2834 - accuracy: 0.9883 Epoch 56: val_accuracy did not improve from 0.94833 28/28 [==============================] - 13s 401ms/step - loss: 0.2834 - accuracy: 0.9883 - val_loss: 6.6538 - val_accuracy: 0.3650 Epoch 57/64 28/28 [==============================] - ETA: 0s - loss: 0.4093 - accuracy: 0.8821 Epoch 57: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 388ms/step - loss: 0.4093 - accuracy: 0.8821 - val_loss: 7460.0474 - val_accuracy: 0.1983 Epoch 58/64 28/28 [==============================] - ETA: 0s - loss: 0.4295 - accuracy: 0.9074 Epoch 58: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 379ms/step - loss: 0.4295 - accuracy: 0.9074 - val_loss: 101390.4141 - val_accuracy: 0.3233 Epoch 59/64 28/28 [==============================] - ETA: 0s - loss: 0.6900 - accuracy: 0.8939 Epoch 59: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 381ms/step - loss: 0.6900 - accuracy: 0.8939 - val_loss: 499.5102 - val_accuracy: 0.3667 Epoch 60/64 28/28 [==============================] - ETA: 0s - loss: 0.3896 - accuracy: 0.9198 Epoch 60: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 376ms/step - loss: 0.3896 - accuracy: 0.9198 - val_loss: 1086.9932 - val_accuracy: 0.3700 Epoch 61/64 28/28 [==============================] - ETA: 0s - loss: 0.3533 - accuracy: 0.9129 Epoch 61: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 382ms/step - loss: 0.3533 - accuracy: 0.9129 - val_loss: 856.5764 - val_accuracy: 0.3833 Epoch 62/64 28/28 [==============================] - ETA: 0s - loss: 0.2202 - accuracy: 0.9279 Epoch 62: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 378ms/step - loss: 0.2202 - accuracy: 0.9279 - val_loss: 64.6030 - val_accuracy: 0.4300 Epoch 63/64 28/28 [==============================] - ETA: 0s - loss: 0.1676 - accuracy: 0.9488 Epoch 63: val_accuracy did not improve from 0.94833 28/28 [==============================] - 11s 374ms/step - loss: 0.1676 - accuracy: 0.9488 - val_loss: 14.8945 - val_accuracy: 0.5650 Epoch 64/64 28/28 [==============================] - ETA: 0s - loss: 0.1218 - accuracy: 0.9586 Epoch 64: val_accuracy did not improve from 0.94833 28/28 [==============================] - 10s 362ms/step - loss: 0.1218 - accuracy: 0.9586 - val_loss: 6.0849 - val_accuracy: 0.6933
# Loading the Best Fit Model
model = load_model(root_dir + "/cervical_cancer_best_model_ResNet-50.hdf5")
# Checking the Accuracy of the Model
accuracy = model.evaluate_generator(generator= test_data)[1]
print(f"The accuracy of your ResNet-50 model is = {accuracy*100} %")
The accuracy of your ResNet-50 model is = 93.43185424804688 %
# [1]: This accesses the second element of the returned list, which corresponds to the accuracy of the model. The first element ([0]) is the loss.
h = cnn.history;
h.keys();
# Ploting Accuracy In Training Set & Validation Set
plt.plot(h['accuracy'])
plt.plot(h['val_accuracy'] , c = "red")
plt.title("acc vs v-acc")
plt.show()
# Ploting Loss In Training Set & Validation Set
plt.plot(h['loss'])
plt.plot(h['val_loss'] , c = "red")
plt.title("loss vs v-loss")
plt.show()
def cancerPrediction(path):
classes_dir = ["Dyskeratotic","Koilocytotic","Metaplastic","Parabasal","Superficial-Intermediate"]
# Loading Image
img = image.load_img(path, target_size=(64,64))
# Normalizing Image
norm_img = image.img_to_array(img)/255
# Converting Image to Numpy Array
input_arr_img = np.array([norm_img])
# Getting Predictions
pred = np.argmax(model.predict(input_arr_img))
# Printing Model Prediction
print(classes_dir[pred])
path = "/content/drive/Shareddrives/Computer Vision Final Project/im_Dyskeratotic/im_Dyskeratotic/CROPPED/002_04.bmp"
cancerPrediction(path)
1/1 [==============================] - 2s 2s/step Dyskeratotic
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
# Get the true labels and predictions
y_true = test_data.classes
# Predict the probabilities for each class
y_pred = model.predict(test_data)
# Convert probabilities to class labels using np.argmax
y_pred_classes = np.argmax(y_pred, axis=1)
# Calculate the classification report
report = classification_report(y_true, y_pred_classes, target_names=test_data.class_indices.keys())
print("Classification Report:\n", report)
# Calculate and print the confusion matrix
cm = confusion_matrix(y_true, y_pred_classes)
print("Confusion Matrix:\n", cm)